In [1]:
!gdown --id 1yStEhGZSPW0tQZPSISjLQvTUK4DVU-1V
Downloading...
From: https://drive.google.com/uc?id=1yStEhGZSPW0tQZPSISjLQvTUK4DVU-1V
To: /content/flowers.zip
235MB [00:01, 227MB/s]
In [2]:
!unzip -q flowers.zip
replace flowers/daisy/100080576_f52e8ee070_n.jpg? [y]es, [n]o, [A]ll, [N]one, [r]ename: A
In [3]:
import os
print(os.listdir('/content/flowers'))
['sunflower', 'rose', 'tulip', 'dandelion', 'daisy']
In [6]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os

    

# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
In [7]:
categories=['sunflower', 'rose', 'tulip', 'dandelion', 'daisy']
In [8]:
dire='/content/flowers'
In [9]:
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
features=[]
for i in categories:
    path=os.path.join(dire,i)
    num_classes=categories.index(i)
    for img in os.listdir(path):
        if img.endswith('.jpg'):
            
            img_array=cv2.imread(os.path.join(path,img),cv2.IMREAD_COLOR)
            img_array=cv2.resize(img_array,(150,150))
            features.append([img_array,num_classes])
In [10]:
X=[]
y=[]
for i,j in features:
    X.append(i)
    y.append(j)
In [11]:
import matplotlib.pyplot as plt
import seaborn as sns
fig,ax=plt.subplots(5,2)
fig.set_size_inches(15,15)
for i in range(5):
    for j in range (2):
        l=np.random.randint(0,len(y))
        ax[i,j].imshow(X[l])
        ax[i,j].set_title('Flower: '+categories[y[l]])
plt.axis('off')        
plt.tight_layout()
In [12]:
X=np.array(X).reshape(-1,150,150,3)/255.0
In [13]:
sns.set_style('whitegrid')
plt.figure(figsize=(14,7))
fig=sns.countplot(y)
fig.set(xticks=range(len(categories)), xticklabels=[i for i in categories])
plt.xlabel('FLOWER SPECIES')
plt.show()
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning
In [14]:
list_dandelion=len([i for i in y if i==0])
list_daisy=len([i for i in y if i==1])
list_sunflower=len([i for i in y if i==2])
list_tulip=len([ i for i in y if i==3])
list_rose=len([i for i in y if i==4])
In [15]:
list_species=[list_dandelion,list_daisy,list_sunflower,list_tulip,list_rose]
In [16]:
sns.set_style('whitegrid')
plt.figure(figsize=(18,10))
plt.pie(list_species,labels=categories,startangle=90,colors=['r','g','b','y','m'],autopct='%1.1f%%',explode = (0, 0.1, 0, 0,0),shadow=True)
plt.legend()
plt.show()
In [17]:
from tensorflow.keras.utils import to_categorical
y=to_categorical(y)
In [18]:
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=12)
In [20]:
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam,SGD,Adagrad,Adadelta,RMSprop


# specifically for cnn
from keras.layers import Dropout, Flatten,Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
 
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
In [21]:
model = Sequential()

model.add(Conv2D(32, (3, 3), input_shape=(150,150,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(5, activation="softmax"))
In [22]:
epochs=50

from keras.callbacks import ReduceLROnPlateau
red_lr= ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.1)
In [23]:
datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
        zoom_range = 0.1, # Randomly zoom image 
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images


datagen.fit(x_train)
In [24]:
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:375: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
  "The `lr` argument is deprecated, use `learning_rate` instead.")
In [25]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 148, 148, 32)      896       
_________________________________________________________________
activation (Activation)      (None, 148, 148, 32)      0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 74, 74, 32)        0         
_________________________________________________________________
dropout (Dropout)            (None, 74, 74, 32)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 72, 72, 64)        18496     
_________________________________________________________________
activation_1 (Activation)    (None, 72, 72, 64)        0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 36, 36, 64)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 36, 36, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 34, 34, 128)       73856     
_________________________________________________________________
activation_2 (Activation)    (None, 34, 34, 128)       0         
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 17, 17, 128)       0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 17, 17, 128)       0         
_________________________________________________________________
flatten (Flatten)            (None, 36992)             0         
_________________________________________________________________
dense (Dense)                (None, 512)               18940416  
_________________________________________________________________
dropout_3 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 128)               65664     
_________________________________________________________________
dropout_4 (Dropout)          (None, 128)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 5)                 645       
=================================================================
Total params: 19,099,973
Trainable params: 19,099,973
Non-trainable params: 0
_________________________________________________________________
In [26]:
History = model.fit_generator(datagen.flow(x_train,y_train, batch_size=128),
                              epochs = epochs, validation_data = (x_test,y_test),
                              verbose = 1, steps_per_epoch=x_train.shape[0] // 128)
# model.fit(x_train,y_train,epochs=epochs,batch_size=batch_size,validation_data = (x_test,y_test))
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:1915: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  warnings.warn('`Model.fit_generator` is deprecated and '
Epoch 1/50
26/26 [==============================] - 64s 698ms/step - loss: 2.1361 - accuracy: 0.2427 - val_loss: 1.3429 - val_accuracy: 0.4213
Epoch 2/50
26/26 [==============================] - 17s 643ms/step - loss: 1.3612 - accuracy: 0.3940 - val_loss: 1.2135 - val_accuracy: 0.4792
Epoch 3/50
26/26 [==============================] - 16s 617ms/step - loss: 1.2723 - accuracy: 0.4340 - val_loss: 1.1423 - val_accuracy: 0.5116
Epoch 4/50
26/26 [==============================] - 16s 611ms/step - loss: 1.1748 - accuracy: 0.4851 - val_loss: 1.1320 - val_accuracy: 0.5475
Epoch 5/50
26/26 [==============================] - 17s 635ms/step - loss: 1.1261 - accuracy: 0.5221 - val_loss: 1.0422 - val_accuracy: 0.6030
Epoch 6/50
26/26 [==============================] - 17s 638ms/step - loss: 1.0983 - accuracy: 0.5548 - val_loss: 0.9695 - val_accuracy: 0.6227
Epoch 7/50
26/26 [==============================] - 16s 614ms/step - loss: 1.0395 - accuracy: 0.5816 - val_loss: 0.9475 - val_accuracy: 0.6296
Epoch 8/50
26/26 [==============================] - 16s 613ms/step - loss: 0.9732 - accuracy: 0.6130 - val_loss: 0.8555 - val_accuracy: 0.6597
Epoch 9/50
26/26 [==============================] - 17s 650ms/step - loss: 0.8995 - accuracy: 0.6324 - val_loss: 0.8862 - val_accuracy: 0.6562
Epoch 10/50
26/26 [==============================] - 16s 630ms/step - loss: 0.8872 - accuracy: 0.6596 - val_loss: 0.8386 - val_accuracy: 0.6701
Epoch 11/50
26/26 [==============================] - 17s 657ms/step - loss: 0.8627 - accuracy: 0.6638 - val_loss: 0.8301 - val_accuracy: 0.6921
Epoch 12/50
26/26 [==============================] - 17s 632ms/step - loss: 0.8665 - accuracy: 0.6738 - val_loss: 0.8165 - val_accuracy: 0.6817
Epoch 13/50
26/26 [==============================] - 16s 621ms/step - loss: 0.8357 - accuracy: 0.6798 - val_loss: 0.7758 - val_accuracy: 0.7002
Epoch 14/50
26/26 [==============================] - 17s 632ms/step - loss: 0.8236 - accuracy: 0.6809 - val_loss: 0.7461 - val_accuracy: 0.7245
Epoch 15/50
26/26 [==============================] - 17s 648ms/step - loss: 0.7964 - accuracy: 0.7016 - val_loss: 0.7297 - val_accuracy: 0.7188
Epoch 16/50
26/26 [==============================] - 16s 613ms/step - loss: 0.7964 - accuracy: 0.6936 - val_loss: 0.7301 - val_accuracy: 0.7106
Epoch 17/50
26/26 [==============================] - 16s 622ms/step - loss: 0.7846 - accuracy: 0.6823 - val_loss: 0.7879 - val_accuracy: 0.6887
Epoch 18/50
26/26 [==============================] - 17s 640ms/step - loss: 0.7884 - accuracy: 0.7065 - val_loss: 0.6874 - val_accuracy: 0.7338
Epoch 19/50
26/26 [==============================] - 17s 642ms/step - loss: 0.7171 - accuracy: 0.7090 - val_loss: 0.6846 - val_accuracy: 0.7523
Epoch 20/50
26/26 [==============================] - 16s 612ms/step - loss: 0.7119 - accuracy: 0.7268 - val_loss: 0.6731 - val_accuracy: 0.7477
Epoch 21/50
26/26 [==============================] - 16s 615ms/step - loss: 0.6956 - accuracy: 0.7353 - val_loss: 0.6885 - val_accuracy: 0.7315
Epoch 22/50
26/26 [==============================] - 17s 646ms/step - loss: 0.7318 - accuracy: 0.7389 - val_loss: 0.6493 - val_accuracy: 0.7581
Epoch 23/50
26/26 [==============================] - 17s 646ms/step - loss: 0.6968 - accuracy: 0.7195 - val_loss: 0.6819 - val_accuracy: 0.7384
Epoch 24/50
26/26 [==============================] - 16s 618ms/step - loss: 0.6657 - accuracy: 0.7554 - val_loss: 0.6604 - val_accuracy: 0.7627
Epoch 25/50
26/26 [==============================] - 16s 625ms/step - loss: 0.6477 - accuracy: 0.7590 - val_loss: 0.6787 - val_accuracy: 0.7627
Epoch 26/50
26/26 [==============================] - 17s 648ms/step - loss: 0.6774 - accuracy: 0.7343 - val_loss: 0.6472 - val_accuracy: 0.7627
Epoch 27/50
26/26 [==============================] - 16s 630ms/step - loss: 0.6587 - accuracy: 0.7391 - val_loss: 0.6514 - val_accuracy: 0.7569
Epoch 28/50
26/26 [==============================] - 17s 652ms/step - loss: 0.6143 - accuracy: 0.7712 - val_loss: 0.6604 - val_accuracy: 0.7500
Epoch 29/50
26/26 [==============================] - 16s 614ms/step - loss: 0.6136 - accuracy: 0.7703 - val_loss: 0.6572 - val_accuracy: 0.7558
Epoch 30/50
26/26 [==============================] - 16s 615ms/step - loss: 0.6293 - accuracy: 0.7649 - val_loss: 0.6096 - val_accuracy: 0.7755
Epoch 31/50
26/26 [==============================] - 17s 643ms/step - loss: 0.6141 - accuracy: 0.7705 - val_loss: 0.7132 - val_accuracy: 0.7257
Epoch 32/50
26/26 [==============================] - 16s 626ms/step - loss: 0.6097 - accuracy: 0.7673 - val_loss: 0.6431 - val_accuracy: 0.7662
Epoch 33/50
26/26 [==============================] - 16s 620ms/step - loss: 0.5684 - accuracy: 0.7903 - val_loss: 0.6748 - val_accuracy: 0.7512
Epoch 34/50
26/26 [==============================] - 16s 631ms/step - loss: 0.6032 - accuracy: 0.7678 - val_loss: 0.6176 - val_accuracy: 0.7801
Epoch 35/50
26/26 [==============================] - 17s 650ms/step - loss: 0.5814 - accuracy: 0.7866 - val_loss: 0.6679 - val_accuracy: 0.7662
Epoch 36/50
26/26 [==============================] - 16s 619ms/step - loss: 0.5923 - accuracy: 0.7749 - val_loss: 0.5880 - val_accuracy: 0.7836
Epoch 37/50
26/26 [==============================] - 16s 623ms/step - loss: 0.5284 - accuracy: 0.7985 - val_loss: 0.5821 - val_accuracy: 0.7882
Epoch 38/50
26/26 [==============================] - 18s 672ms/step - loss: 0.5582 - accuracy: 0.7851 - val_loss: 0.5853 - val_accuracy: 0.7951
Epoch 39/50
26/26 [==============================] - 17s 649ms/step - loss: 0.5458 - accuracy: 0.7920 - val_loss: 0.6236 - val_accuracy: 0.7674
Epoch 40/50
26/26 [==============================] - 16s 626ms/step - loss: 0.5304 - accuracy: 0.7929 - val_loss: 0.5852 - val_accuracy: 0.7928
Epoch 41/50
26/26 [==============================] - 17s 641ms/step - loss: 0.5107 - accuracy: 0.8130 - val_loss: 0.5815 - val_accuracy: 0.7917
Epoch 42/50
26/26 [==============================] - 17s 651ms/step - loss: 0.5281 - accuracy: 0.8011 - val_loss: 0.5848 - val_accuracy: 0.7870
Epoch 43/50
26/26 [==============================] - 17s 650ms/step - loss: 0.5203 - accuracy: 0.8021 - val_loss: 0.5900 - val_accuracy: 0.7824
Epoch 44/50
26/26 [==============================] - 17s 642ms/step - loss: 0.5106 - accuracy: 0.8154 - val_loss: 0.6125 - val_accuracy: 0.7847
Epoch 45/50
26/26 [==============================] - 17s 672ms/step - loss: 0.4952 - accuracy: 0.8115 - val_loss: 0.5966 - val_accuracy: 0.7778
Epoch 46/50
26/26 [==============================] - 17s 657ms/step - loss: 0.4835 - accuracy: 0.8085 - val_loss: 0.5770 - val_accuracy: 0.7940
Epoch 47/50
26/26 [==============================] - 17s 661ms/step - loss: 0.4874 - accuracy: 0.8114 - val_loss: 0.5565 - val_accuracy: 0.8021
Epoch 48/50
26/26 [==============================] - 16s 625ms/step - loss: 0.4666 - accuracy: 0.8261 - val_loss: 0.5804 - val_accuracy: 0.7940
Epoch 49/50
26/26 [==============================] - 16s 629ms/step - loss: 0.4626 - accuracy: 0.8204 - val_loss: 0.5745 - val_accuracy: 0.7928
Epoch 50/50
26/26 [==============================] - 17s 664ms/step - loss: 0.4701 - accuracy: 0.8293 - val_loss: 0.5552 - val_accuracy: 0.8056
In [27]:
sns.set_style('whitegrid')
plt.figure(figsize=(12,5))
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
In [28]:
sns.set_style('whitegrid')
plt.figure(figsize=(12,5))
plt.plot(History.history['accuracy'])
plt.plot(History.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
In [29]:
preds=model.predict(x_test)
In [30]:
predictions=np.argmax(preds,axis=1)
In [31]:
correct_class=[]
incorrect_class=[]
i=0
for i in range(len(y_test)):
    if(np.argmax(y_test[i])==predictions[i]):
        correct_class.append(i)
    if(len(correct_class)==8):
        break
In [32]:
i=0
for i in range(len(y_test)):
    
    if (np.argmax(y_test[i])!=predictions[i]):
        
        incorrect_class.append(i)
    if (len(incorrect_class)==8):
        break
In [33]:
count=0
fig,ax=plt.subplots(4,2)
fig.set_size_inches(15,15)
for i in range (4):
    for j in range (2):
        ax[i,j].imshow(x_test[correct_class[count]])
        ax[i,j].set_title("Predicted Flower : "+ categories[predictions[correct_class[count]]] +"\n"+"Actual Flower : "+ categories[np.argmax(y_test[correct_class[count]])])
        plt.tight_layout()
        count+=1
In [34]:
count=0
fig,ax=plt.subplots(4,2)
fig.set_size_inches(15,15)
for i in range(4):
    for j in range(2):
        ax[i,j].imshow(x_test[incorrect_class[count]])
        ax[i,j].set_title("Predicted flower : " + categories[predictions[incorrect_class[count]]] + "\n"+"Actual Flower : " +categories[np.argmax(y_test[incorrect_class[count]])])
        plt.tight_layout()
        count+=1
In [35]:
import requests
from PIL import Image
from io import BytesIO

def process_image(url):
    response=requests.get(url)
    img=Image.open(BytesIO(response.content))
    fix,ax=plt.subplots(1,3,figsize=(15,20))
    ax[0].imshow(img)
    ax[0].set_title('image')
    
    #grayscale and normalization
    img=np.array(img)
    img=cv2.cvtColor(img,cv2.IMREAD_COLOR)
    print(img.shape)
    img=img/255.0
    ax[1].imshow(img)
    ax[1].set_title('color image')
    
    #resizing
    img=cv2.resize(img,(150,150))
    print(img.shape)
    ax[2].imshow(img)
    ax[2].set_title('predicted image')
    plt.tight_layout()
    img=np.expand_dims(img,axis=0)
    #making it model ready
    
    print(img.shape)
    return img
In [36]:
def predict(url):
    img=process_image(url)
    label=model.predict(img)
    final_1=np.argmax(label,axis=1)[0]
    plt.xlabel(categories[final_1])
    return categories[final_1]
In [37]:
predict("https://media4.picsearch.com/is?LwsQDsAhRnF2IV-PP61f1fCUcQWD2jYoz6X55V_6-dg&height=266") 
(266, 341, 3)
(150, 150, 3)
(1, 150, 150, 3)
Out[37]:
'daisy'
In [38]:
predict("https://media5.picsearch.com/is?8agnR1fAz2qzGkGmQsnFEb0nXkmuh-7hb-Il2rLLd7U&height=341")
(341, 255, 3)
(150, 150, 3)
(1, 150, 150, 3)
Out[38]:
'tulip'